obj-y += external.o
obj-y += float.o
obj-y += hcalls.o
-obj-y += htab.o
obj-y += iommu.o
obj-y += irq.o
obj-y += mambo.o
#include <xen/domain.h>
#include <xen/console.h>
#include <xen/shutdown.h>
+#include <xen/shadow.h>
#include <xen/mm.h>
#include <asm/htab.h>
#include <asm/current.h>
unsigned long rma_base;
unsigned long rma_sz;
uint rma_order_pages;
- uint htab_order_pages;
int rc;
if (d->domain_id == IDLE_DOMAIN_ID) {
d->arch.large_page_sizes = cpu_large_page_orders(
d->arch.large_page_order, ARRAY_SIZE(d->arch.large_page_order));
- /* FIXME: we need to the the maximum addressible memory for this
- * domain to calculate this correctly. It should probably be set
- * by the managment tools */
- htab_order_pages = rma_order_pages - 6; /* (1/64) */
- if (test_bit(_DOMF_privileged, &d->domain_flags)) {
- /* bump the htab size of privleged domains */
- ++htab_order_pages;
- }
- htab_alloc(d, htab_order_pages);
-
INIT_LIST_HEAD(&d->arch.extent_list);
return 0;
void arch_domain_destroy(struct domain *d)
{
- htab_free(d);
+ shadow_teardown(d);
}
void machine_halt(void)
{
memcpy(&v->arch.ctxt, &c->user_regs, sizeof(c->user_regs));
+ printf("Domain[%d].%d: initializing\n",
+ v->domain->domain_id, v->vcpu_id);
+
+ if (v->domain->arch.htab.order == 0)
+ panic("Page table never allocated for Domain: %d\n",
+ v->domain->domain_id);
+ if (v->domain->arch.rma_order == 0)
+ panic("RMA never allocated for Domain: %d\n",
+ v->domain->domain_id);
+
set_bit(_VCPUF_initialised, &v->vcpu_flags);
cpu_init_vcpu(v);
void continue_running(struct vcpu *same)
{
/* nothing to do */
+ return;
}
void sync_vcpu_execstate(struct vcpu *v)
{
- /* XXX for now, for domain destruction, make this non-fatal */
- printf("%s: called\n", __func__);
+ /* do nothing */
+ return;
}
void domain_relinquish_resources(struct domain *d)
#include <xen/init.h>
#include <xen/ctype.h>
#include <xen/iocap.h>
+#include <xen/shadow.h>
#include <xen/version.h>
#include <asm/processor.h>
#include <asm/papr.h>
#include "oftree.h"
-#define log2(x) ffz(~(x))
-
extern int parseelfimage_32(struct domain_setup_info *dsi);
extern int loadelfimage_32(struct domain_setup_info *dsi);
uint rma_nrpages = 1 << d->arch.rma_order;
ulong rma_sz = rma_size(d->arch.rma_order);
ulong rma = page_to_maddr(d->arch.rma_page);
- uint htab_order;
start_info_t *si;
ulong eomem;
int am64 = 1;
+ int preempt = 0;
ulong msr;
ulong pc;
ulong r2;
dom0_nrpages = allocate_extents(d, dom0_nrpages, rma_nrpages);
d->tot_pages = dom0_nrpages;
- ASSERT(d->tot_pages > 0);
-
- htab_order = log2(d->tot_pages) - 6;
- if (d->arch.htab.order > 0) {
- /* we incorrectly allocate this too early so lets adjust if
- * necessary */
- printk("WARNING: htab allocated to early\n");
- if (d->arch.htab.order < htab_order) {
- printk("WARNING: htab reallocated for more memory: 0x%x\n",
- htab_order);
- htab_free(d);
- htab_alloc(d, htab_order);
- }
+ ASSERT(d->tot_pages >= rma_nrpages);
+
+ if (opt_dom0_shadow == 0) {
+ /* 1/64 of memory */
+ opt_dom0_shadow = (d->tot_pages >> 6) >> (20 - PAGE_SHIFT);
}
+ do {
+ shadow_set_allocation(d, opt_dom0_shadow, &preempt);
+ } while (preempt);
+ if (shadow_get_allocation(d) == 0)
+ panic("shadow allocation failed 0x%x < 0x%x\n",
+ shadow_get_allocation(d), opt_dom0_shadow);
+
ASSERT( image_len < rma_sz );
si = (start_info_t *)(rma_addr(&d->arch, RMA_START_INFO) + rma);
+++ /dev/null
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- *
- * Copyright (C) IBM Corp. 2005
- *
- * Authors: Hollis Blanchard <hollisb@us.ibm.com>
- */
-
-#include <xen/config.h>
-#include <xen/sched.h>
-
-static ulong htab_calc_sdr1(ulong htab_addr, ulong log_htab_size)
-{
- ulong sdr1_htabsize;
-
- ASSERT((htab_addr & ((1UL << log_htab_size) - 1)) == 0);
- ASSERT(log_htab_size <= SDR1_HTABSIZE_MAX);
- ASSERT(log_htab_size >= HTAB_MIN_LOG_SIZE);
-
- sdr1_htabsize = log_htab_size - LOG_PTEG_SIZE - SDR1_HTABSIZE_BASEBITS;
-
- return (htab_addr | (sdr1_htabsize & SDR1_HTABSIZE_MASK));
-}
-
-void htab_alloc(struct domain *d, uint order)
-{
- ulong htab_raddr;
- ulong log_htab_bytes = order + PAGE_SHIFT;
- ulong htab_bytes = 1UL << log_htab_bytes;
-
- /* XXX use alloc_domheap_pages instead? */
- htab_raddr = (ulong)alloc_xenheap_pages(order);
- ASSERT(htab_raddr != 0);
- /* XXX check alignment guarantees */
- ASSERT((htab_raddr & (htab_bytes - 1)) == 0);
-
- /* XXX slow. move memset out to service partition? */
- memset((void *)htab_raddr, 0, htab_bytes);
-
- d->arch.htab.order = order;
- d->arch.htab.log_num_ptes = log_htab_bytes - LOG_PTE_SIZE;
- d->arch.htab.sdr1 = htab_calc_sdr1(htab_raddr, log_htab_bytes);
- d->arch.htab.map = (union pte *)htab_raddr;
- d->arch.htab.shadow = xmalloc_array(ulong,
- 1UL << d->arch.htab.log_num_ptes);
- ASSERT(d->arch.htab.shadow != NULL);
-}
-
-void htab_free(struct domain *d)
-{
- ulong htab_raddr = GET_HTAB(d);
-
- free_xenheap_pages((void *)htab_raddr, d->arch.htab.order);
- xfree(d->arch.htab.shadow);
-}
-
#include <xen/shadow.h>
#include <public/dom0_ops.h>
+static ulong htab_calc_sdr1(ulong htab_addr, ulong log_htab_size)
+{
+ ulong sdr1_htabsize;
+
+ ASSERT((htab_addr & ((1UL << log_htab_size) - 1)) == 0);
+ ASSERT(log_htab_size <= SDR1_HTABSIZE_MAX);
+ ASSERT(log_htab_size >= HTAB_MIN_LOG_SIZE);
+
+ sdr1_htabsize = log_htab_size - LOG_PTEG_SIZE - SDR1_HTABSIZE_BASEBITS;
+
+ return (htab_addr | (sdr1_htabsize & SDR1_HTABSIZE_MASK));
+}
+
+static ulong htab_alloc(struct domain *d, uint order)
+{
+ ulong htab_raddr;
+ uint log_htab_bytes = order + PAGE_SHIFT;
+ uint htab_bytes = 1UL << log_htab_bytes;
+
+ /* we use xenheap pages to keep domheap pages usefull for domains */
+
+ if (order < 6)
+ order = 6; /* architectural minimum is 2^18 */
+ if (order < 34)
+ order = 34; /* architectural minimum is 2^46 */
+
+ htab_raddr = (ulong)alloc_xenheap_pages(order);
+ if (htab_raddr > 0) {
+ ASSERT((htab_raddr & (htab_bytes - 1)) == 0);
+
+ d->arch.htab.order = order;
+ d->arch.htab.log_num_ptes = log_htab_bytes - LOG_PTE_SIZE;
+ d->arch.htab.sdr1 = htab_calc_sdr1(htab_raddr, log_htab_bytes);
+ d->arch.htab.map = (union pte *)htab_raddr;
+ }
+ return htab_raddr;
+}
+
+static void htab_free(struct domain *d)
+{
+ ulong htab_raddr = GET_HTAB(d);
+
+ free_xenheap_pages((void *)htab_raddr, d->arch.htab.order);
+}
+
+
+unsigned int shadow_teardown(struct domain *d)
+{
+ htab_free(d);
+ return 0;
+}
+
+unsigned int shadow_set_allocation(struct domain *d,
+ unsigned int megabytes,
+ int *preempted)
+{
+ unsigned int rc;
+ uint pages;
+ uint p;
+ uint order;
+ ulong addr;
+
+
+ if (d->arch.htab.order)
+ return -EBUSY;
+
+ if (megabytes == 0) {
+ /* old management tools */
+ megabytes = 1; /* 1/64th of 64M */
+ printk("%s: Fix management tools to set and get shadow/htab values\n"
+ " using %d MiB htab\n",
+ __func__, megabytes);
+ }
+ pages = megabytes << (20 - PAGE_SHIFT);
+ order = fls(pages) - 1; /* log2 truncated */
+ if (pages & ((1 << order) - 1))
+ ++order; /* round up */
+
+ addr = htab_alloc(d, order);
+
+ printk("%s: ibm,fpt-size should be: 0x%x\n", __func__,
+ d->arch.htab.log_num_ptes + LOG_PTE_SIZE);
+
+ if (addr == 0)
+ return -ENOMEM;
+
+ /* XXX make this a continuation */
+ for (p = 0; p < (1 << order); p++)
+ clear_page((void *)(addr + (p << PAGE_SHIFT)));
+
+ return rc;
+}
+
int shadow_control_op(struct domain *d,
dom0_shadow_control_t *sc,
XEN_GUEST_HANDLE(dom0_op_t) u_dom0_op)
switch ( sc->op )
{
case DOM0_SHADOW_CONTROL_OP_OFF:
- return 0;
+ DPRINTK("Shadow is mandatory!\n");
+ return -EINVAL;
case DOM0_SHADOW2_CONTROL_OP_GET_ALLOCATION:
- sc->mb = 0;
- return 0;
- case DOM0_SHADOW2_CONTROL_OP_SET_ALLOCATION:
- if (sc->mb > 0) {
- BUG();
- return -ENOMEM;
- }
+ sc->mb = shadow_get_allocation(d);
return 0;
+ case DOM0_SHADOW2_CONTROL_OP_SET_ALLOCATION: {
+ int rc;
+ int preempted = 0;
+
+ rc = shadow_set_allocation(d, sc->mb, &preempted);
+
+ if (preempted)
+ /* Not finished. Set up to re-run the call. */
+ rc = hypercall_create_continuation(
+ __HYPERVISOR_dom0_op, "h", u_dom0_op);
+ else
+ /* Finished. Return the new allocation */
+ sc->mb = shadow_get_allocation(d);
+ return rc;
+ }
+
default:
printk("Bad shadow op %u\n", sc->op);
BUG();
union pte *map; /* access the htab like an array */
ulong *shadow; /* idx -> logical translation array */
};
-
-struct domain;
-extern void htab_alloc(struct domain *d, uint order);
-extern void htab_free(struct domain *d);
#endif
extern int shadow_control_op(struct domain *d,
dom0_shadow_control_t *sc,
XEN_GUEST_HANDLE(dom0_op_t) u_dom0_op);
+extern unsigned int shadow_teardown(struct domain *d);
+extern unsigned int shadow_set_allocation(
+ struct domain *d, unsigned int megabytes, int *preempted);
+
+/* Return the size of the shadow2 pool, rounded up to the nearest MB */
+static inline unsigned int shadow_get_allocation(struct domain *d)
+{
+ return (1ULL << (d->arch.htab.order + PAGE_SHIFT)) >> 20;
+}
#endif